From 2ff253bcb673ece307074c6b20290ef0d37f2e8f Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Fri, 6 Mar 2009 18:54:09 +0000 Subject: [PATCH] Fix cpu selection at the time vCPU allocation After cpu_[online/offline], set bits in cpu_online_map could be not continuous. Use cycle_cpu() to pick the next one. Signed-off-by: Xiaowei Yang --- xen/common/domctl.c | 2 +- xen/common/sched_credit.c | 15 +++------------ xen/include/xen/cpumask.h | 17 +++++++++++++++-- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/xen/common/domctl.c b/xen/common/domctl.c index df6954370a..23c2f4e529 100644 --- a/xen/common/domctl.c +++ b/xen/common/domctl.c @@ -433,7 +433,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl) cpu = (i == 0) ? default_vcpu0_location() : - (d->vcpu[i-1]->processor + 1) % num_online_cpus(); + cycle_cpu(d->vcpu[i-1]->processor, cpu_online_map); if ( alloc_vcpu(d, i, cpu) == NULL ) goto maxvcpu_out; diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index 3ba7d3e8cd..923cad0079 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -249,15 +249,6 @@ static struct csched_private csched_priv; static void csched_tick(void *_cpu); -static inline int -__cycle_cpu(int cpu, const cpumask_t *mask) -{ - int nxt = next_cpu(cpu, *mask); - if (nxt == NR_CPUS) - nxt = first_cpu(*mask); - return nxt; -} - static inline int __vcpu_on_runq(struct csched_vcpu *svc) { @@ -428,7 +419,7 @@ csched_cpu_pick(struct vcpu *vc) cpus_and(cpus, cpu_online_map, vc->cpu_affinity); cpu = cpu_isset(vc->processor, cpus) ? vc->processor - : __cycle_cpu(vc->processor, &cpus); + : cycle_cpu(vc->processor, cpus); ASSERT( !cpus_empty(cpus) && cpu_isset(cpu, cpus) ); /* @@ -454,7 +445,7 @@ csched_cpu_pick(struct vcpu *vc) cpumask_t nxt_idlers; int nxt; - nxt = __cycle_cpu(cpu, &cpus); + nxt = cycle_cpu(cpu, cpus); if ( cpu_isset(cpu, cpu_core_map[nxt]) ) { @@ -1128,7 +1119,7 @@ csched_load_balance(int cpu, struct csched_vcpu *snext) while ( !cpus_empty(workers) ) { - peer_cpu = __cycle_cpu(peer_cpu, &workers); + peer_cpu = cycle_cpu(peer_cpu, workers); cpu_clear(peer_cpu, workers); /* diff --git a/xen/include/xen/cpumask.h b/xen/include/xen/cpumask.h index a6f8e25942..23dfd09a14 100644 --- a/xen/include/xen/cpumask.h +++ b/xen/include/xen/cpumask.h @@ -38,6 +38,8 @@ * * int first_cpu(mask) Number lowest set bit, or NR_CPUS * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS + * int last_cpu(mask) Number highest set bit, or NR_CPUS + * int cycle_cpu(cpu, mask) Next cpu cycling from 'cpu', or NR_CPUS * * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set * CPU_MASK_ALL Initializer - all bits set @@ -225,12 +227,23 @@ static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits) #define last_cpu(src) __last_cpu(&(src), NR_CPUS) static inline int __last_cpu(const cpumask_t *srcp, int nbits) { - int cpu, pcpu = NR_CPUS; - for (cpu = first_cpu(*srcp); cpu < NR_CPUS; cpu = next_cpu(cpu, *srcp)) + int cpu, pcpu = nbits; + for (cpu = __first_cpu(srcp, nbits); + cpu < nbits; + cpu = __next_cpu(cpu, srcp, nbits)) pcpu = cpu; return pcpu; } +#define cycle_cpu(n, src) __cycle_cpu((n), &(src), NR_CPUS) +static inline int __cycle_cpu(int n, const cpumask_t *srcp, int nbits) +{ + int nxt = __next_cpu(n, srcp, nbits); + if (nxt == nbits) + nxt = __first_cpu(srcp, nbits); + return nxt; +} + #define cpumask_of_cpu(cpu) \ ({ \ typeof(_unused_cpumask_arg_) m; \ -- 2.30.2